[IA64] use pte_pfn() where possible
authorIsaku Yamahata <yamahata@valinux.co.jp>
Thu, 6 Nov 2008 03:14:57 +0000 (12:14 +0900)
committerIsaku Yamahata <yamahata@valinux.co.jp>
Thu, 6 Nov 2008 03:14:57 +0000 (12:14 +0900)
use pte_pfn() to get mfn from pte.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/vmx/sioemu.c
xen/arch/ia64/vmx/vmmu.c
xen/arch/ia64/vmx/vmx_fault.c
xen/arch/ia64/vmx/vmx_init.c
xen/arch/ia64/vmx/vtlb.c
xen/arch/ia64/xen/dom0_ops.c

index 526d8c087f4c61206713faaf9619c1a4cffcb562..8c3c8b373c8407fd7bc9e5091dbb50a2ddb4fd40 100644 (file)
@@ -148,7 +148,7 @@ sioemu_set_callback (struct vcpu *v, unsigned long cb_ip, unsigned long paddr)
     pte = *lookup_noalloc_domain_pte(v->domain, paddr);
     if (!pte_present(pte) || !pte_mem(pte))
         return -EINVAL;
-    mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+    mfn = pte_pfn(pte);
     ASSERT(mfn_valid(mfn));
 
     page = mfn_to_page(mfn);
index 3bf05290515583c6e64e5d496eb5944c4c81d090..29f73be647cb5f1466e74b884d1547356411be7d 100644 (file)
@@ -312,7 +312,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
     */   
     if (ps != _PAGE_SIZE_16M)
         thash_purge_entries(vcpu, va, ps);
-    gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
+    gpfn = pte_pfn(__pte(pte));
     vcpu_get_rr(vcpu, va, &rid);
     rid &= RR_RID_MASK;
     p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
index e1365aa4a05cbd73c94a4b4be4ed2951b7e3b764..82ecb14d370a6c784df7f019658daf20bcf519bd 100644 (file)
@@ -376,7 +376,7 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
                 pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
                 if (v->domain != dom0 && (pte & _PAGE_IO)) {
                     emulate_io_inst(v, pa_clear_uc(vadr), 4,
-                                    (pte & _PFN_MASK) >> PAGE_SHIFT);
+                                    pte_pfn(__pte(pte)));
                     return IA64_FAULT;
                 }
                 physical_tlb_miss(v, vadr, type);
@@ -413,7 +413,7 @@ try_again:
                                  " pte=0x%lx\n", data->page_flags);
                 if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
                     emulate_io_inst(v, gppa, data->ma, 
-                                    (pte & _PFN_MASK) >> PAGE_SHIFT);
+                                    pte_pfn(__pte(pte)));
                 else {
                     vcpu_set_isr(v, misr.val);
                     data_access_rights(v, vadr);
index 00c23ff168100628f2c5462f045e40ae24d0e96e..8708d819fe6243f3c73cd51fe9077e1af5f717f3 100644 (file)
@@ -457,7 +457,7 @@ int vmx_set_ioreq_page(
        pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT);
        if (!pte_present(pte) || !pte_mem(pte))
                return -EINVAL;
-       mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT;
+       mfn = pte_pfn(pte);
        ASSERT(mfn_valid(mfn));
 
        page = mfn_to_page(mfn);
index d4651507227fa43297fbc853fa5b5cf3c7af5f7f..cab4b86e0f58aaaf19ce077a58acd69e9311b4ba 100644 (file)
@@ -522,8 +522,7 @@ static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va)
      * which is required by vga acceleration since qemu maps shared
      * vram buffer with WB.
      */
-    if (mfn_valid((maddr & _PAGE_PPN_MASK) >> PAGE_SHIFT)
-                               && phy_pte.ma != VA_MATTR_NATPAGE)
+    if (mfn_valid(pte_pfn(__pte(pte))) && phy_pte.ma != VA_MATTR_NATPAGE)
         phy_pte.ma = VA_MATTR_WB;
 
     maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
index c986a35216ba15bcd4fe883e815d8b16e2720c07..c8f2d8ef74a1281bd8fe12192b5b3973aeb8da43 100644 (file)
@@ -800,7 +800,7 @@ do_dom0vp_op(unsigned long cmd,
             dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
                      __func__, ret);
         } else {
-            ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
+            ret = pte_pfn(__pte(ret));
         }
         perfc_incr(dom0vp_phystomach);
         break;